x86/hvm: Don't unconditionally set up nested HVM state
authorTim Deegan <Tim.Deegan@citrix.com>
Thu, 7 Apr 2011 10:12:55 +0000 (11:12 +0100)
committerTim Deegan <Tim.Deegan@citrix.com>
Thu, 7 Apr 2011 10:12:55 +0000 (11:12 +0100)
for domains that aren't going to use it.

Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/nestedhvm.c
xen/arch/x86/hvm/svm/nestedsvm.c
xen/include/asm-x86/hvm/hvm.h
xen/include/asm-x86/hvm/nestedhvm.h
xen/include/asm-x86/hvm/svm/nestedsvm.h

index 515614bbc81b2a14111178749905290e69a837f3..669330fbdad09d1119948a9f3c141864beb011cb 100644 (file)
@@ -967,18 +967,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
     if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 )
         goto fail2;
 
-    /* When I start the l1 guest with 'xm/xend' then HVM_PARAM_NESTEDHVM
-     * is already evaluated.
-     *
-     * When I start the l1 guest with 'xl' then HVM_PARAM_NESTEDHVM
-     * has not been evaluated yet so we have to initialise nested
-     * virtualization unconditionally here.
-     */
-    rc = nestedhvm_vcpu_initialise(v);
-    if ( rc < 0 ) {
-        printk("%s: nestedhvm_vcpu_initialise returned %i\n", __func__, rc);
+    if ( (rc = nestedhvm_vcpu_initialise(v)) < 0 ) 
         goto fail3;
-    }
 
     /* Create ioreq event channel. */
     rc = alloc_unbound_xen_event_channel(v, 0);
@@ -1046,11 +1036,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
 
 void hvm_vcpu_destroy(struct vcpu *v)
 {
-    int rc;
-
-    rc = nestedhvm_vcpu_destroy(v);
-    if (rc)
-       gdprintk(XENLOG_ERR, "nestedhvm_vcpu_destroy() failed with %i\n", rc);
+    nestedhvm_vcpu_destroy(v);
 
 #ifdef CONFIG_COMPAT
     free_compat_arg_xlat(v);
@@ -3436,6 +3422,11 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
                  */
                 if ( !paging_mode_hap(d) && a.value )
                     rc = -EINVAL;
+                /* Set up NHVM state for any vcpus that are already up */
+                if ( !d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] )
+                    for_each_vcpu(d, v)
+                        if ( rc == 0 )
+                            rc = nestedhvm_vcpu_initialise(v);
                 break;
             }
 
@@ -4035,11 +4026,10 @@ int nhvm_vcpu_initialise(struct vcpu *v)
     return -EOPNOTSUPP;
 }
 
-int nhvm_vcpu_destroy(struct vcpu *v)
+void nhvm_vcpu_destroy(struct vcpu *v)
 {
-    if (hvm_funcs.nhvm_vcpu_destroy)
-        return hvm_funcs.nhvm_vcpu_destroy(v);
-    return -EOPNOTSUPP;
+    if ( hvm_funcs.nhvm_vcpu_destroy )
+        hvm_funcs.nhvm_vcpu_destroy(v);
 }
 
 int nhvm_vcpu_reset(struct vcpu *v)
index 601a0b9246e48d77df8a5de975552aef5ce77e93..298f34ae1b20831e1f7461c20d5db4ae558b0a03 100644 (file)
@@ -33,12 +33,8 @@ nestedhvm_enabled(struct domain *d)
     bool_t enabled;
 
     enabled = !!(d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM]);
-    /* sanity check */
     BUG_ON(enabled && !is_hvm_domain(d));
-
-    if (!is_hvm_domain(d))
-        return 0;
-
+    
     return enabled;
 }
 
@@ -78,8 +74,11 @@ nestedhvm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
 
-    rc = nhvm_vcpu_initialise(v); 
-    if (rc) {
+    if ( !nestedhvm_enabled(v->domain) )
+        return 0;
+
+    if ( (rc = nhvm_vcpu_initialise(v)) )
+    {
         nhvm_vcpu_destroy(v);
         return rc;
     }
@@ -88,13 +87,11 @@ nestedhvm_vcpu_initialise(struct vcpu *v)
     return 0;
 }
 
-int
+void
 nestedhvm_vcpu_destroy(struct vcpu *v)
 {
-    if (!nestedhvm_enabled(v->domain))
-        return 0;
-
-    return nhvm_vcpu_destroy(v);
+    if ( nestedhvm_enabled(v->domain) )
+        nhvm_vcpu_destroy(v);
 }
 
 static void
index 7c9f2d0bba1ed3a4b51755db82ca280a0dca921b..425112a37ee3994d9524e9131e4766048538b6a0 100644 (file)
@@ -112,7 +112,7 @@ err:
     return -ENOMEM;
 }
 
-int nsvm_vcpu_destroy(struct vcpu *v)
+void nsvm_vcpu_destroy(struct vcpu *v)
 {
     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
     struct nestedsvm *svm = &vcpu_nestedsvm(v);
@@ -134,8 +134,6 @@ int nsvm_vcpu_destroy(struct vcpu *v)
     }
     if (svm->ns_iomap)
         svm->ns_iomap = NULL;
-
-    return 0;
 }
 
 int nsvm_vcpu_reset(struct vcpu *v)
index 0c80f43d2185062d6d74f8ac5e67b835d78b05d0..1445fd982ee4e7b3c2bf60314937deb2e318e940 100644 (file)
@@ -148,7 +148,7 @@ struct hvm_function_table {
 
     /* Nested HVM */
     int (*nhvm_vcpu_initialise)(struct vcpu *v);
-    int (*nhvm_vcpu_destroy)(struct vcpu *v);
+    void (*nhvm_vcpu_destroy)(struct vcpu *v);
     int (*nhvm_vcpu_reset)(struct vcpu *v);
     int (*nhvm_vcpu_hostrestore)(struct vcpu *v,
                                 struct cpu_user_regs *regs);
@@ -415,7 +415,7 @@ static inline int hvm_memory_event_int3(unsigned long gla)
 /* Initialize vcpu's struct nestedhvm */
 int nhvm_vcpu_initialise(struct vcpu *v);
 /* Destroy and free vcpu's struct nestedhvm */
-int nhvm_vcpu_destroy(struct vcpu *v);
+void nhvm_vcpu_destroy(struct vcpu *v);
 /* Reset vcpu's state when l1 guest disables nested virtualization */
 int nhvm_vcpu_reset(struct vcpu *v);
 /* Restores l1 guest state */
index dc7c0cc1e789814b0020ff1317ef6a2cfa729063..123aab3fe0b4cfc03e0812c1f1ed98fe7ff4b49f 100644 (file)
@@ -38,7 +38,7 @@ bool_t nestedhvm_enabled(struct domain *d);
 
 /* Nested VCPU */
 int nestedhvm_vcpu_initialise(struct vcpu *v);
-int nestedhvm_vcpu_destroy(struct vcpu *v);
+void nestedhvm_vcpu_destroy(struct vcpu *v);
 void nestedhvm_vcpu_reset(struct vcpu *v);
 bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v);
 #define nestedhvm_vcpu_enter_guestmode(v) \
index 876802e521c05ac07e1f945b6711b9e322486c24..51248315bb3b4754ad0fb9d974c0fdcc2adca11d 100644 (file)
@@ -100,7 +100,7 @@ nestedsvm_check_intercepts(struct vcpu *v, struct cpu_user_regs *regs,
     uint64_t exitcode);
 
 /* Interface methods */
-int nsvm_vcpu_destroy(struct vcpu *v);
+void nsvm_vcpu_destroy(struct vcpu *v);
 int nsvm_vcpu_initialise(struct vcpu *v);
 int nsvm_vcpu_reset(struct vcpu *v);
 int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs);